In [2]:
import tensorflow as tf
from keras.datasets import mnist
import cv2
import os
import pathlib
from keras.layers import Conv2D, Conv2DTranspose, Dropout, Dense, Reshape, LayerNormalization, LeakyReLU
from keras import layers, models
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import accuracy_score, classification_report
from sklearn.metrics import f1_score, recall_score, precision_score
/opt/conda/lib/python3.10/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.16.5 and <1.23.0 is required for this version of SciPy (detected version 1.23.5
  warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
/opt/conda/lib/python3.10/site-packages/tensorflow_io/python/ops/__init__.py:98: UserWarning: unable to load libtensorflow_io_plugins.so: unable to open file: libtensorflow_io_plugins.so, from paths: ['/opt/conda/lib/python3.10/site-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so']
caused by: ['/opt/conda/lib/python3.10/site-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so: undefined symbol: _ZN3tsl6StatusC1EN10tensorflow5error4CodeESt17basic_string_viewIcSt11char_traitsIcEENS_14SourceLocationE']
  warnings.warn(f"unable to load libtensorflow_io_plugins.so: {e}")
/opt/conda/lib/python3.10/site-packages/tensorflow_io/python/ops/__init__.py:104: UserWarning: file system plugins are not loaded: unable to open file: libtensorflow_io.so, from paths: ['/opt/conda/lib/python3.10/site-packages/tensorflow_io/python/ops/libtensorflow_io.so']
caused by: ['/opt/conda/lib/python3.10/site-packages/tensorflow_io/python/ops/libtensorflow_io.so: undefined symbol: _ZTVN10tensorflow13GcsFileSystemE']
  warnings.warn(f"file system plugins are not loaded: {e}")
In [3]:
class ReadDataset:
    def __init__(self, datasetpath, labels, image_shape):
        self.datasetpath = datasetpath
        self.labels = labels
        self.image_shape = image_shape
    def returListImages(self,):
        self.images = []
        for label in self.labels:
            self.images.append(list(pathlib.Path(os.path.join(self.datasetpath,
                                                              label)).glob('*.*')))
    def readImages(self,):
        self.returListImages()
        self.finalImages = []
        labels = []
        for label in range(len(self.labels)):
            for img in self.images[label]:
                img = cv2.imread(str(img))
                img = cv2.resize(img , self.image_shape)
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                img  = img/255
                self.finalImages.append(img)
                labels.append(label)
        images = np.array(self.finalImages)
        labels = np.array(labels)
        return images, labels
In [4]:
readDatasetObject = ReadDataset('/kaggle/input/chest-xray-pneumonia/chest_xray/train',
                               ['NORMAL', 'PNEUMONIA'],
                               (64, 64))
images, labels = readDatasetObject.readImages()
In [5]:
images.shape, labels.shape
Out[5]:
((5216, 64, 64, 3), (5216,))
In [6]:
plt.figure(figsize = (12, 12))
indexs = np.random.randint(0, len(labels), size = (64, ))
for i in range(64):
    plt.subplot(8, 8, (i + 1))
    plt.imshow(images[indexs[i]])
    plt.title(labels[indexs[i]])
plt.legend()
Out[6]:
<matplotlib.legend.Legend at 0x7c63de2e2d40>
In [7]:
class Acgan:
    def __init__(self, eta, batch_size, epochs, weight_decay, latent_space,
                 image_shape, kernel_size):
        self.eta = eta
        self.batch_size = batch_size
        self.epochs = epochs
        self.weight_decay = weight_decay
        self.latent_space = latent_space
        self.image_shape = image_shape
        self.kernel_size = kernel_size
    def data(self, images, labels):
        ytrain = tf.keras.utils.to_categorical(labels)
        self.images = images
        self.labels = ytrain
    def samples(self, G, noize, labels):
        images = G.predict([noize, labels])
        ys = np.argmax(labels, axis = 1)
        plt.figure(figsize = (12, 4))
        for i in range(16):
            plt.subplot(2, 8, (i + 1))
            plt.imshow(images[i], cmap = 'gray')
            plt.title(ys[i])
        plt.show()
    def generator(self, inputs, labels):
        filters = [256, 128, 64, 32]
        padding = 'same'
        x = inputs
        y = labels
        x = layers.concatenate([x, y])
        x = layers.Dense(1024, )(x)
        x = layers.Dense(8*8*filters[0],
                         kernel_regularizer = tf.keras.regularizers.L2(0.001))(x)
        x = layers.Reshape((8, 8, filters[0]))(x)
        for filter in filters:
            if filter >= 64:
                strides = 2
            else:
                strides = 1
            x = LayerNormalization()(x)
            x = layers.Activation('relu')(x)
            x = Conv2DTranspose(filter, kernel_size = self.kernel_size, padding = padding,
                      strides = strides)(x)
        x = Conv2DTranspose(3, kernel_size = self.kernel_size, padding = padding)(x)
        x = layers.Activation('sigmoid')(x)
        self.generatorModel = models.Model(inputs = [inputs, labels],
                                           outputs = x,
                                           name = 'generator')
    def discriminator(self, inputs):
        x = inputs
        filters = [32, 64, 128, 256]
        padding = 'same'
        for filter in filters:
            if filter < 256:
                strides = 2
            else:
                strides = 1
            x = Conv2D(filter, kernel_size = self.kernel_size, padding = padding,
                      strides = strides,
                      kernel_regularizer = tf.keras.regularizers.L2(0.001))(x)
            x = LeakyReLU(alpha = 0.2)(x)
        x = layers.Flatten()(x)
        outputs = Dense(1, )(x)
        labelsOutput = Dense(256,
                             kernel_regularizer = tf.keras.regularizers.L2(0.001))(x)
        labelsOutput = Dropout(0.3)(labelsOutput)
        labelsOutput = Dense(2,)(labelsOutput)
        labelsOutput = layers.Activation('softmax')(labelsOutput)
        self.discriminatorModel = models.Model(inputs = inputs,
                                               outputs = [outputs, labelsOutput],
                                               name = 'discriminator')
    def build(self,):
        generatorInput = layers.Input(shape = (self.latent_space))
        discriminatorInput = layers.Input(shape = (self.image_shape))
        labelsInput = layers.Input(shape = (2, ))
        self.generator(generatorInput, labelsInput)
        self.discriminator(discriminatorInput)
        G = self.generatorModel
        D = self.discriminatorModel
        D.compile(loss = ['mse', 'binary_crossentropy'],
                 optimizer = tf.keras.optimizers.RMSprop(learning_rate = self.eta,
                                                        weight_decay = self.weight_decay))
        D.summary()
        G.summary()
        D.trainable = False
        GAN = models.Model(inputs = [generatorInput, labelsInput],
                           outputs = D(G([generatorInput, labelsInput])))
        GAN.compile(loss = ['mse', 'binary_crossentropy'],
                   optimizer = tf.keras.optimizers.RMSprop(learning_rate = self.eta*0.5,
                                                          weight_decay = self.weight_decay*0.5))
        GAN.summary()
        return G, D, GAN
    def trainAlgorithm(self, G, D, GAN):
        for epoch in range(self.epochs):
            indexs = np.random.randint(0, len(self.images), size = (self.batch_size, ))
            realImages = self.images[indexs]
            realLabels = self.labels[indexs]
            realTag = tf.ones(shape = (self.batch_size, ))
            noize = tf.random.uniform(shape = (self.batch_size,
                                              self.latent_space), minval = -1,
                                     maxval = 1)
            fakeLabels = tf.keras.utils.to_categorical(np.random.choice(range(2), size = (self.batch_size)),
                                                      num_classes = 2)
            fakeImages = tf.squeeze(G.predict([noize, fakeLabels], verbose = 0))
            fakeTag = tf.zeros(shape = (self.batch_size, ))
            allImages = np.vstack([realImages, fakeImages])
            allLabels = np.vstack([realLabels, fakeLabels])
            allTags = np.hstack([realTag, fakeTag])
            _, dlossTag, dlossLabels = D.train_on_batch(allImages, [allTags, allLabels])
            noize = tf.random.uniform(shape = (self.batch_size,
                                              self.latent_space), minval = -1,
                                     maxval = 1)
            _, glossTag, glossLabels = GAN.train_on_batch([noize, fakeLabels], [realTag, fakeLabels])
            if epoch % 5000 == 0:
                print('Epoch: {}'.format(epoch))
                print('discriminator loss: [tag: {}, labels: {}], generator loss: [tag: {}, labels: {}]'.format(dlossTag,
                                                                                                                dlossLabels,
                                                                                                                glossTag,
                                                                                                                glossLabels))
                self.samples(G, noize, fakeLabels)
In [8]:
acgan = Acgan(eta = 0.0001, batch_size = 32, epochs = 32000, weight_decay = 6e-9,
              latent_space = 100, image_shape = (64, 64, 3), kernel_size = 5)
In [9]:
acgan.data(images, labels)
In [10]:
G, D, GAN = acgan.build()
Model: "discriminator"
__________________________________________________________________________________________________
 Layer (type)                   Output Shape         Param #     Connected to                     
==================================================================================================
 input_2 (InputLayer)           [(None, 64, 64, 3)]  0           []                               
                                                                                                  
 conv2d (Conv2D)                (None, 32, 32, 32)   2432        ['input_2[0][0]']                
                                                                                                  
 leaky_re_lu (LeakyReLU)        (None, 32, 32, 32)   0           ['conv2d[0][0]']                 
                                                                                                  
 conv2d_1 (Conv2D)              (None, 16, 16, 64)   51264       ['leaky_re_lu[0][0]']            
                                                                                                  
 leaky_re_lu_1 (LeakyReLU)      (None, 16, 16, 64)   0           ['conv2d_1[0][0]']               
                                                                                                  
 conv2d_2 (Conv2D)              (None, 8, 8, 128)    204928      ['leaky_re_lu_1[0][0]']          
                                                                                                  
 leaky_re_lu_2 (LeakyReLU)      (None, 8, 8, 128)    0           ['conv2d_2[0][0]']               
                                                                                                  
 conv2d_3 (Conv2D)              (None, 8, 8, 256)    819456      ['leaky_re_lu_2[0][0]']          
                                                                                                  
 leaky_re_lu_3 (LeakyReLU)      (None, 8, 8, 256)    0           ['conv2d_3[0][0]']               
                                                                                                  
 flatten (Flatten)              (None, 16384)        0           ['leaky_re_lu_3[0][0]']          
                                                                                                  
 dense_3 (Dense)                (None, 256)          4194560     ['flatten[0][0]']                
                                                                                                  
 dropout (Dropout)              (None, 256)          0           ['dense_3[0][0]']                
                                                                                                  
 dense_4 (Dense)                (None, 2)            514         ['dropout[0][0]']                
                                                                                                  
 dense_2 (Dense)                (None, 1)            16385       ['flatten[0][0]']                
                                                                                                  
 activation_5 (Activation)      (None, 2)            0           ['dense_4[0][0]']                
                                                                                                  
==================================================================================================
Total params: 5,289,539
Trainable params: 5,289,539
Non-trainable params: 0
__________________________________________________________________________________________________
Model: "generator"
__________________________________________________________________________________________________
 Layer (type)                   Output Shape         Param #     Connected to                     
==================================================================================================
 input_1 (InputLayer)           [(None, 100)]        0           []                               
                                                                                                  
 input_3 (InputLayer)           [(None, 2)]          0           []                               
                                                                                                  
 concatenate (Concatenate)      (None, 102)          0           ['input_1[0][0]',                
                                                                  'input_3[0][0]']                
                                                                                                  
 dense (Dense)                  (None, 1024)         105472      ['concatenate[0][0]']            
                                                                                                  
 dense_1 (Dense)                (None, 16384)        16793600    ['dense[0][0]']                  
                                                                                                  
 reshape (Reshape)              (None, 8, 8, 256)    0           ['dense_1[0][0]']                
                                                                                                  
 layer_normalization (LayerNorm  (None, 8, 8, 256)   512         ['reshape[0][0]']                
 alization)                                                                                       
                                                                                                  
 activation (Activation)        (None, 8, 8, 256)    0           ['layer_normalization[0][0]']    
                                                                                                  
 conv2d_transpose (Conv2DTransp  (None, 16, 16, 256)  1638656    ['activation[0][0]']             
 ose)                                                                                             
                                                                                                  
 layer_normalization_1 (LayerNo  (None, 16, 16, 256)  512        ['conv2d_transpose[0][0]']       
 rmalization)                                                                                     
                                                                                                  
 activation_1 (Activation)      (None, 16, 16, 256)  0           ['layer_normalization_1[0][0]']  
                                                                                                  
 conv2d_transpose_1 (Conv2DTran  (None, 32, 32, 128)  819328     ['activation_1[0][0]']           
 spose)                                                                                           
                                                                                                  
 layer_normalization_2 (LayerNo  (None, 32, 32, 128)  256        ['conv2d_transpose_1[0][0]']     
 rmalization)                                                                                     
                                                                                                  
 activation_2 (Activation)      (None, 32, 32, 128)  0           ['layer_normalization_2[0][0]']  
                                                                                                  
 conv2d_transpose_2 (Conv2DTran  (None, 64, 64, 64)  204864      ['activation_2[0][0]']           
 spose)                                                                                           
                                                                                                  
 layer_normalization_3 (LayerNo  (None, 64, 64, 64)  128         ['conv2d_transpose_2[0][0]']     
 rmalization)                                                                                     
                                                                                                  
 activation_3 (Activation)      (None, 64, 64, 64)   0           ['layer_normalization_3[0][0]']  
                                                                                                  
 conv2d_transpose_3 (Conv2DTran  (None, 64, 64, 32)  51232       ['activation_3[0][0]']           
 spose)                                                                                           
                                                                                                  
 conv2d_transpose_4 (Conv2DTran  (None, 64, 64, 3)   2403        ['conv2d_transpose_3[0][0]']     
 spose)                                                                                           
                                                                                                  
 activation_4 (Activation)      (None, 64, 64, 3)    0           ['conv2d_transpose_4[0][0]']     
                                                                                                  
==================================================================================================
Total params: 19,616,963
Trainable params: 19,616,963
Non-trainable params: 0
__________________________________________________________________________________________________
Model: "model"
__________________________________________________________________________________________________
 Layer (type)                   Output Shape         Param #     Connected to                     
==================================================================================================
 input_1 (InputLayer)           [(None, 100)]        0           []                               
                                                                                                  
 input_3 (InputLayer)           [(None, 2)]          0           []                               
                                                                                                  
 generator (Functional)         (None, 64, 64, 3)    19616963    ['input_1[0][0]',                
                                                                  'input_3[0][0]']                
                                                                                                  
 discriminator (Functional)     [(None, 1),          5289539     ['generator[0][0]']              
                                 (None, 2)]                                                       
                                                                                                  
==================================================================================================
Total params: 24,906,502
Trainable params: 19,616,963
Non-trainable params: 5,289,539
__________________________________________________________________________________________________
In [11]:
tf.keras.utils.plot_model(GAN, show_shapes = True)
Out[11]:
In [12]:
tf.keras.utils.plot_model(G, show_shapes = True)
Out[12]:
In [13]:
tf.keras.utils.plot_model(D, show_shapes = True)
Out[13]:
In [14]:
acgan.trainAlgorithm(G, D, GAN)
Epoch: 0
discriminator loss: [tag: 0.5671104788780212, labels: 0.692040741443634], generator loss: [tag: 0.5819922685623169, labels: 0.6864303946495056]
1/1 [==============================] - 0s 21ms/step
Epoch: 5000
discriminator loss: [tag: 0.26543426513671875, labels: 0.03770596161484718], generator loss: [tag: 0.3469369113445282, labels: 0.0006412705988623202]
1/1 [==============================] - 0s 20ms/step
Epoch: 10000
discriminator loss: [tag: 0.24920831620693207, labels: 0.0031062685884535313], generator loss: [tag: 0.2599722146987915, labels: 0.0073606595396995544]
1/1 [==============================] - 0s 24ms/step
Epoch: 15000
discriminator loss: [tag: 0.23986151814460754, labels: 0.020241793245077133], generator loss: [tag: 0.2756638526916504, labels: 0.008874816820025444]
1/1 [==============================] - 0s 20ms/step
Epoch: 20000
discriminator loss: [tag: 0.24258743226528168, labels: 0.0038555630017071962], generator loss: [tag: 0.2774313688278198, labels: 0.0011579369893297553]
1/1 [==============================] - 0s 19ms/step
Epoch: 25000
discriminator loss: [tag: 0.21438449621200562, labels: 0.00024850803310982883], generator loss: [tag: 0.4628139138221741, labels: 0.009382463060319424]
1/1 [==============================] - 0s 25ms/step
Epoch: 30000
discriminator loss: [tag: 0.2039976269006729, labels: 0.000991622800938785], generator loss: [tag: 0.6103872656822205, labels: 8.902423724066466e-05]
1/1 [==============================] - 0s 19ms/step
In [15]:
G.save('/kaggle/working/generator.h5')
In [16]:
G = tf.keras.models.load_model('/kaggle/working/generator.h5')
In [17]:
datasetGenerationSize = 3000
noize = tf.random.uniform(shape = (datasetGenerationSize, 100), minval = -1, maxval = 1)
newlabels = tf.keras.utils.to_categorical(np.random.choice([0, 1], size = (datasetGenerationSize, )), num_classes = 2)
In [18]:
noize.shape, newlabels.shape
Out[18]:
(TensorShape([3000, 100]), (3000, 2))
In [19]:
np.unique(np.argmax(newlabels, axis = 1), return_counts = True)
Out[19]:
(array([0, 1]), array([1490, 1510]))
In [20]:
imagesGeneration = G.predict([noize, newlabels])
imagesGeneration.shape
94/94 [==============================] - 3s 31ms/step
Out[20]:
(3000, 64, 64, 3)
In [21]:
plt.figure(figsize = (12, 12))
t = np.argmax(newlabels, axis = 1)
for i in range(64):
    plt.subplot(8, 8, (i + 1))
    plt.imshow(imagesGeneration[i])
    plt.title(t[i])
plt.legend()
Out[21]:
<matplotlib.legend.Legend at 0x7c634035d0c0>
In [22]:
basemodel = tf.keras.applications.VGG16(weights = None, input_shape = (64, 64, 3),
                                        pooling = 'max', include_top = False)
x = layers.Dropout(0.4)(basemodel.output)
x = layers.Dense(128,)(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(alpha = 0.2)(x)
x = layers.Dropout(0.4)(x)
x = layers.Dense(32,)(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(alpha = 0.2)(x)
x = layers.Dropout(0.4)(x)
x = layers.Dense(1, activation = 'sigmoid')(x)
m = tf.keras.models.Model(inputs = basemodel.input, outputs = x)
m.compile(loss = 'binary_crossentropy', optimizer = tf.keras.optimizers.Adam(learning_rate = 0.00001))
m.summary()
Model: "model_1"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 input_4 (InputLayer)        [(None, 64, 64, 3)]       0         
                                                                 
 block1_conv1 (Conv2D)       (None, 64, 64, 64)        1792      
                                                                 
 block1_conv2 (Conv2D)       (None, 64, 64, 64)        36928     
                                                                 
 block1_pool (MaxPooling2D)  (None, 32, 32, 64)        0         
                                                                 
 block2_conv1 (Conv2D)       (None, 32, 32, 128)       73856     
                                                                 
 block2_conv2 (Conv2D)       (None, 32, 32, 128)       147584    
                                                                 
 block2_pool (MaxPooling2D)  (None, 16, 16, 128)       0         
                                                                 
 block3_conv1 (Conv2D)       (None, 16, 16, 256)       295168    
                                                                 
 block3_conv2 (Conv2D)       (None, 16, 16, 256)       590080    
                                                                 
 block3_conv3 (Conv2D)       (None, 16, 16, 256)       590080    
                                                                 
 block3_pool (MaxPooling2D)  (None, 8, 8, 256)         0         
                                                                 
 block4_conv1 (Conv2D)       (None, 8, 8, 512)         1180160   
                                                                 
 block4_conv2 (Conv2D)       (None, 8, 8, 512)         2359808   
                                                                 
 block4_conv3 (Conv2D)       (None, 8, 8, 512)         2359808   
                                                                 
 block4_pool (MaxPooling2D)  (None, 4, 4, 512)         0         
                                                                 
 block5_conv1 (Conv2D)       (None, 4, 4, 512)         2359808   
                                                                 
 block5_conv2 (Conv2D)       (None, 4, 4, 512)         2359808   
                                                                 
 block5_conv3 (Conv2D)       (None, 4, 4, 512)         2359808   
                                                                 
 block5_pool (MaxPooling2D)  (None, 2, 2, 512)         0         
                                                                 
 global_max_pooling2d (Globa  (None, 512)              0         
 lMaxPooling2D)                                                  
                                                                 
 dropout_1 (Dropout)         (None, 512)               0         
                                                                 
 dense_5 (Dense)             (None, 128)               65664     
                                                                 
 batch_normalization (BatchN  (None, 128)              512       
 ormalization)                                                   
                                                                 
 leaky_re_lu_4 (LeakyReLU)   (None, 128)               0         
                                                                 
 dropout_2 (Dropout)         (None, 128)               0         
                                                                 
 dense_6 (Dense)             (None, 32)                4128      
                                                                 
 batch_normalization_1 (Batc  (None, 32)               128       
 hNormalization)                                                 
                                                                 
 leaky_re_lu_5 (LeakyReLU)   (None, 32)                0         
                                                                 
 dropout_3 (Dropout)         (None, 32)                0         
                                                                 
 dense_7 (Dense)             (None, 1)                 33        
                                                                 
=================================================================
Total params: 14,785,153
Trainable params: 14,784,833
Non-trainable params: 320
_________________________________________________________________
In [23]:
history = m.fit(imagesGeneration, np.argmax(newlabels, axis = 1),
                epochs = 30, batch_size = 64,
                validation_split = 0.2,
                callbacks = [tf.keras.callbacks.EarlyStopping(patience = 2, monitor = 'val_loss', mode = 'min',
                                                              restore_best_weights = True)])
Epoch 1/30
38/38 [==============================] - 17s 157ms/step - loss: 0.6555 - val_loss: 0.6922
Epoch 2/30
38/38 [==============================] - 3s 87ms/step - loss: 0.3790 - val_loss: 0.6904
Epoch 3/30
38/38 [==============================] - 3s 84ms/step - loss: 0.2742 - val_loss: 0.6874
Epoch 4/30
38/38 [==============================] - 3s 84ms/step - loss: 0.2183 - val_loss: 0.6832
Epoch 5/30
38/38 [==============================] - 3s 85ms/step - loss: 0.1750 - val_loss: 0.6757
Epoch 6/30
38/38 [==============================] - 3s 85ms/step - loss: 0.1406 - val_loss: 0.6647
Epoch 7/30
38/38 [==============================] - 3s 86ms/step - loss: 0.1280 - val_loss: 0.6498
Epoch 8/30
38/38 [==============================] - 3s 85ms/step - loss: 0.1272 - val_loss: 0.6199
Epoch 9/30
38/38 [==============================] - 3s 87ms/step - loss: 0.1114 - val_loss: 0.5848
Epoch 10/30
38/38 [==============================] - 3s 85ms/step - loss: 0.0927 - val_loss: 0.5391
Epoch 11/30
38/38 [==============================] - 3s 84ms/step - loss: 0.0875 - val_loss: 0.4780
Epoch 12/30
38/38 [==============================] - 3s 84ms/step - loss: 0.0856 - val_loss: 0.4131
Epoch 13/30
38/38 [==============================] - 3s 84ms/step - loss: 0.0702 - val_loss: 0.3332
Epoch 14/30
38/38 [==============================] - 3s 86ms/step - loss: 0.0725 - val_loss: 0.2718
Epoch 15/30
38/38 [==============================] - 3s 86ms/step - loss: 0.0681 - val_loss: 0.2124
Epoch 16/30
38/38 [==============================] - 3s 84ms/step - loss: 0.0673 - val_loss: 0.1573
Epoch 17/30
38/38 [==============================] - 3s 86ms/step - loss: 0.0649 - val_loss: 0.1204
Epoch 18/30
38/38 [==============================] - 3s 83ms/step - loss: 0.0678 - val_loss: 0.1779
Epoch 19/30
38/38 [==============================] - 3s 86ms/step - loss: 0.0635 - val_loss: 0.1326
In [24]:
plt.figure(figsize = (7, 6))
plt.plot(history.history['loss'], label = 'training loss')
plt.plot(history.history['val_loss'], label = 'validation loss')
plt.title('Results obtained while training a neural network on images generated by the neural network')
plt.legend()
Out[24]:
<matplotlib.legend.Legend at 0x7c634056cdf0>
In [25]:
m.evaluate(images, labels)
163/163 [==============================] - 2s 14ms/step - loss: 0.1854
Out[25]:
0.18541084229946136
In [26]:
y_pred = tf.squeeze(m.predict(images))
y_pred.shape
163/163 [==============================] - 3s 13ms/step
Out[26]:
TensorShape([5216])
In [27]:
y_pred = y_pred >= 0.5
y_pred = np.array(y_pred, dtype = 'int32')
y_pred
Out[27]:
array([1, 0, 0, ..., 1, 1, 1], dtype=int32)
In [28]:
accuracy_score(y_pred, labels)*100
Out[28]:
95.39877300613497
In [29]:
print(classification_report(y_pred, labels))
              precision    recall  f1-score   support

           0       0.89      0.93      0.91      1281
           1       0.98      0.96      0.97      3935

    accuracy                           0.95      5216
   macro avg       0.93      0.95      0.94      5216
weighted avg       0.96      0.95      0.95      5216

In [30]:
from sklearn.metrics import confusion_matrix
import seaborn as sns
cm = confusion_matrix(y_pred, labels)
cm
Out[30]:
array([[1191,   90],
       [ 150, 3785]])
In [31]:
import pandas as pd
cmObject = pd.DataFrame(cm , index = ['NORMAL', 'PNEUMONIA'],
                        columns = ['NORMAL', 'PNEUMONIA'])
cmObject.head()
Out[31]:
NORMAL PNEUMONIA
NORMAL 1191 90
PNEUMONIA 150 3785
In [32]:
print('f1_score: {}, recall_score: {}, precision_score: {}'.format(f1_score(y_pred, labels)*100,
                                                                   recall_score(y_pred, labels)*100,
                                                                   precision_score(y_pred, labels)*100))
f1_score: 96.9270166453265, recall_score: 96.18805590851333, precision_score: 97.67741935483872
In [33]:
sns.heatmap(cmObject, annot = True, cmap="Blues")
Out[33]:
<Axes: >
In [ ]: